type runtime.p
57 uses
runtime (current package)
mgc.go#L896: forEachP(waitReasonGCMarkTermination, func(pp *p) {
mgc.go#L1213: forEachP(waitReasonFlushProcCaches, func(pp *p) {
mgc.go#L1568: func gcMarkWorkAvailable(p *p) bool {
mgcpacer.go#L731: func (c *gcControllerState) findRunnableGCWorker(pp *p, now int64) (*g, int64) {
mgcpacer.go#L894: func (c *gcControllerState) addScannableStack(pp *p, amount int64) {
mwbbuf.go#L195: func wbBufFlush1(pp *p) {
proc.go#L2033: func forEachP(reason waitReason, fn func(*p)) {
proc.go#L2067: func forEachPInternal(fn func(*p)) {
proc.go#L2203: func allocm(pp *p, fn func(), id int64) *m {
proc.go#L2777: func newm(fn func(), pp *p, id int64) {
proc.go#L2955: func startm(pp *p, spinning, lockheld bool) {
proc.go#L3051: func handoffp(pp *p) {
proc.go#L3146: var pp *p
proc.go#L3771: func checkRunqsNoP(allpSnapshot []*p, idlepMaskSnapshot pMask) *p {
proc.go#L3793: func checkTimersNoP(allpSnapshot []*p, timerpMaskSnapshot pMask, pollUntil int64) int64 {
proc.go#L3810: func checkIdleGCNoP() (*p, *g) {
proc.go#L4770: func exitsyscallfast(oldp *p) bool {
proc.go#L4864: var pp *p
proc.go#L5199: func gfput(pp *p, gp *g) {
proc.go#L5242: func gfget(pp *p) *g {
proc.go#L5300: func gfpurge(pp *p) {
proc.go#L5577: var pp *p
proc.go#L5630: func (pp *p) init(id int32) {
proc.go#L5670: func (pp *p) destroy() {
proc.go#L5748: func procresize(nprocs int32) *p {
proc.go#L5779: nallp := make([]*p, nprocs)
proc.go#L5806: pp = new(p)
proc.go#L5866: var runnablePs *p
proc.go#L5897: func acquirep(pp *p) {
proc.go#L5920: func wirep(pp *p) {
proc.go#L5948: func releasep() *p {
proc.go#L5958: func releasepNoTrace() *p {
proc.go#L6371: func preemptone(pp *p) bool {
proc.go#L6594: func globrunqget(pp *p, max int32) *g {
proc.go#L6658: func pidleput(pp *p, now int64) int64 {
proc.go#L6687: func pidleget(now int64) (*p, int64) {
proc.go#L6715: func pidlegetSpinning(now int64) (*p, int64) {
proc.go#L6732: func runqempty(pp *p) bool {
proc.go#L6763: func runqput(pp *p, gp *g, next bool) {
proc.go#L6809: func runqputslow(pp *p, gp *g, h, t uint32) bool {
proc.go#L6852: func runqputbatch(pp *p, q *gQueue, qsize int) {
proc.go#L6886: func runqget(pp *p) (gp *g, inheritTime bool) {
proc.go#L6911: func runqdrain(pp *p) (drainQ gQueue, n uint32) {
proc.go#L6952: func runqgrab(pp *p, batch *[256]guintptr, batchHead uint32, stealRunNextG bool) uint32 {
proc.go#L7007: func runqsteal(pp, p2 *p, stealRunNextG bool) *g {
runtime2.go#L267: func (pp puintptr) ptr() *p { return (*p)(unsafe.Pointer(pp)) }
runtime2.go#L270: func (pp *puintptr) set(p *p) { *pp = puintptr(unsafe.Pointer(p)) }
runtime2.go#L632: type p struct {
runtime2.go#L827: safePointFn func(*p)
runtime2.go#L1209: allp []*p
trace.go#L639: forEachP(waitReasonTraceProcStatus, func(pp *p) {
tracecpu.go#L211: func traceCPUSample(gp *g, mp *m, pp *p, stk []uintptr) {
traceruntime.go#L301: func (tl traceLocker) ProcStop(pp *p) {
traceruntime.go#L537: func (tl traceLocker) ProcSteal(pp *p, inSyscall bool) {
tracestatus.go#L84: func (w traceWriter) writeProcStatusForP(pp *p, inSTW bool) traceWriter {